library(tidyverse) # metapackage of all tidyverse packages
Registered S3 methods overwritten by 'dbplyr':
method from
print.tbl_lazy
print.tbl_sql
── Attaching packages ────────────────────────────────── tidyverse 1.3.0 ──
✓ ggplot2 3.3.3 ✓ purrr 0.3.4
✓ tibble 3.0.4 ✓ dplyr 1.0.2
✓ tidyr 1.1.2 ✓ stringr 1.4.0
✓ readr 1.4.0 ✓ forcats 0.5.0
── Conflicts ───────────────────────────────────── tidyverse_conflicts() ──
x dplyr::filter() masks stats::filter()
x dplyr::lag() masks stats::lag()
library(keras)
use_condaenv("r-reticulate")
Load our model
model <- load_model_hdf5("cats_and_dogs_small_2.h5")
2021-04-11 21:52:31.172247: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2021-04-11 21:52:31.191172: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x7fd92eccf8d0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-04-11 21:52:31.191189: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
summary(model)
Model: "sequential_2"
___________________________________________________________________________
Layer (type) Output Shape Param #
===========================================================================
conv2d_10 (Conv2D) (None, 148, 148, 32) 896
___________________________________________________________________________
max_pooling2d_9 (MaxPooling2D) (None, 74, 74, 32) 0
___________________________________________________________________________
conv2d_9 (Conv2D) (None, 72, 72, 64) 18496
___________________________________________________________________________
max_pooling2d_8 (MaxPooling2D) (None, 36, 36, 64) 0
___________________________________________________________________________
conv2d_8 (Conv2D) (None, 34, 34, 128) 73856
___________________________________________________________________________
max_pooling2d_7 (MaxPooling2D) (None, 17, 17, 128) 0
___________________________________________________________________________
conv2d_7 (Conv2D) (None, 15, 15, 128) 147584
___________________________________________________________________________
max_pooling2d_6 (MaxPooling2D) (None, 7, 7, 128) 0
___________________________________________________________________________
flatten_2 (Flatten) (None, 6272) 0
___________________________________________________________________________
dropout (Dropout) (None, 6272) 0
___________________________________________________________________________
dense_5 (Dense) (None, 512) 3211776
___________________________________________________________________________
dense_4 (Dense) (None, 1) 513
===========================================================================
Total params: 3,453,121
Trainable params: 3,453,121
Non-trainable params: 0
___________________________________________________________________________
Get input image:
img_path <- "cats_and_dogs_small/test/cats/cat.1700.jpg"
img <- image_load(img_path, target_size = c(150, 150))
img_tensor <- image_to_array(img)
img_tensor <- array_reshape(img_tensor, c(1, 150, 150, 3))
img_tensor <- img_tensor / 255
dim(img_tensor)
[1] 1 150 150 3
take a look at the image
plot(as.raster(img_tensor[1,,,]))
now create the model. Using keras_model instead of keras_sequential_model allows us to access multiple output layers
layer_outputs <- lapply(model$layers[1:8], function(layer) layer$output)
activation_model <- keras_model(inputs = model$input, outputs = layer_outputs)
layer_outputs[[1]] %>% tensorflow::as.array()
Error: 'as.array' is not an exported object from 'namespace:tensorflow'
activations <- activation_model %>% predict(img_tensor)
str(activations)
List of 8
$ : num [1, 1:148, 1:148, 1:32] 0.00837 0.01414 0.00918 0.01291 0.00825 ...
$ : num [1, 1:74, 1:74, 1:32] 0.0141 0.0133 0.0178 0.021 0.0205 ...
$ : num [1, 1:72, 1:72, 1:64] 0 0 0 0 0 0 0 0 0 0 ...
$ : num [1, 1:36, 1:36, 1:64] 0 0 0 0 0 0 0 0 0 0 ...
$ : num [1, 1:34, 1:34, 1:128] 0.00386 0 0 0 0 ...
$ : num [1, 1:17, 1:17, 1:128] 0.00386 0 0 0.0131 0.00835 ...
$ : num [1, 1:15, 1:15, 1:128] 0.01514 0 0.01075 0.00422 0.01076 ...
$ : num [1, 1:7, 1:7, 1:128] 0.02181 0.01075 0.01471 0.00678 0.02665 ...
define plotting function
plot_channel <- function(channel) {
rotate <- function(x) t(apply(x, 2, rev))
image(rotate(channel), axes = FALSE, asp = 1,
col = topo.colors(12))
}
plot them all
image_size <- 58
images_per_row <- 16
for (i in 1:8) {
layer_activation <- activations[[i]]
layer_name <- model$layers[[i]]$name
n_features <- dim(layer_activation)[[4]]
n_cols <- n_features %/% images_per_row
#png(paste0("cat_activations_", i, "_", layer_name, ".png"),
# width = image_size * images_per_row,
# height = image_size * n_cols)
op <- par(mfrow = c(n_cols, images_per_row), mai = rep_len(0.02, 4))
for (col in 0:(n_cols-1)) {
for (row in 0:(images_per_row-1)) {
channel_image <- layer_activation[1,,,(col*images_per_row) + row + 1]
print(plot_channel(channel_image))
}
}
par(op)
#dev.off()
}
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
NULL
visualizing the filters
set up the loss function
weights = "imagenet",
Error: unexpected ',' in " weights = "imagenet","
get the gradient associated with the above loss and normalize (RMS)
grads <- k_gradients(loss, model$input)[[1]]
grads <- grads / (k_sqrt(k_mean(k_square(grads))) + 1e-5)
now we need to be able to calculate loss and gradient for a given input. We use iterate for this:
iterate <- k_function(list(model$input), list(loss, grads))
c(loss_value, grads_value) %<-%
iterate(list(array(0, dim = c(1, 150, 150, 3))))
put it together into a loop
input_img_data <-
array(runif(150 * 150 * 3), dim = c(1, 150, 150, 3)) * 20 + 128 # input random image, near grey
step <- 1
for (i in 1:40) {
c(loss_value, grads_value) %<-% iterate(list(input_img_data)) # calculate gradient and loss
cat("loss: ", loss_value, "\n")
cat("grads_value: ", grads_value[1,1:5,1,1], "\n")
input_img_data <- input_img_data + (grads_value * step) # update image
}
loss: 68.10915
grads_value: -0.02055234 0.0124106 0.06712954 0.07983842 0.09905355
loss: 148.9816
grads_value: -0.02367963 0.01027688 0.06084429 0.05776044 0.05121794
loss: 248.6013
grads_value: -0.02376306 -0.001260084 0.03469283 0.04041674 0.05864409
loss: 368.2984
grads_value: -0.02290497 0.001561245 0.039823 0.04660433 0.07228445
loss: 499.641
grads_value: -0.01960171 0.009522016 0.04265621 0.0478622 0.09018818
loss: 635.6437
grads_value: -0.02612916 -0.01237173 0.0002258365 0.0184661 0.1257058
loss: 773.5628
grads_value: 0.01547417 0.08030173 0.03711985 0.03134648 0.1461218
loss: 910.878
grads_value: 0.008887916 0.07132961 0.03514753 0.0262162 0.1337125
loss: 1047.179
grads_value: 0.007650015 0.06760189 0.04024948 0.05167056 0.1758872
loss: 1182.105
grads_value: 0.009264603 0.07076927 0.02188255 0.03188983 0.1244984
loss: 1315.951
grads_value: 0.01880095 0.08757914 0.02381711 0.01899291 0.1269091
loss: 1448.217
grads_value: 0.01859585 0.1026091 0.04526343 0.0266121 0.1345554
loss: 1579.036
grads_value: 0.04461221 0.2355232 0.2012401 0.1943541 0.376265
loss: 1708.379
grads_value: 0.06870421 0.2384596 0.1931398 0.1444489 0.2920649
loss: 1836.64
grads_value: 0.08103324 0.3586213 0.2994048 0.180763 0.3360555
loss: 1963.63
grads_value: 0.09606591 0.3835185 0.3049706 0.1680665 0.4143465
loss: 2089.055
grads_value: 0.04516215 0.3836091 0.3454892 0.1179104 0.3556658
loss: 2213.533
grads_value: 0.05075496 0.4168369 0.3764934 0.1279662 0.3292925
loss: 2336.879
grads_value: 0.04339589 0.4811139 0.410225 0.1536793 0.5119777
loss: 2459.118
grads_value: 0.03037276 0.5005596 0.3975278 0.05920871 0.3628269
loss: 2580.301
grads_value: 0.04027561 0.5302778 0.3662272 0.06176559 0.372285
loss: 2700.705
grads_value: 0.08396044 0.5741707 0.3705891 0.07760627 0.4446297
loss: 2820.42
grads_value: 0.09024354 0.6080343 0.4015453 0.1119668 0.5001155
loss: 2939.647
grads_value: 0.09923641 0.6195131 0.4084836 0.0895992 0.4790891
loss: 3058.214
grads_value: 0.1463298 0.6245549 0.3951112 0.1092209 0.5340443
loss: 3176.266
grads_value: 0.1509034 0.633091 0.3740603 0.05978949 0.4644986
loss: 3293.812
grads_value: 0.1764331 0.6503105 0.3870803 -0.0221213 0.3533002
loss: 3410.654
grads_value: 0.1931657 0.6750054 0.408454 0.05900301 0.4735953
loss: 3526.646
grads_value: 0.1990764 0.6735084 0.408569 0.03929114 0.4508763
loss: 3641.977
grads_value: 0.1652934 0.6494454 0.3820862 -0.01116045 0.3897472
loss: 3756.678
grads_value: 0.1833004 0.6597424 0.4220552 0.05023104 0.4164359
loss: 3870.92
grads_value: 0.1640288 0.6446196 0.3983006 -0.008581717 0.366553
loss: 3984.546
grads_value: 0.1870599 0.6434796 0.4097235 0.05200551 0.4438059
loss: 4097.761
grads_value: 0.1900522 0.6500507 0.4007178 0.02511647 0.39864
loss: 4210.431
grads_value: 0.2019599 0.6114587 0.3322644 0.07159135 0.4443951
loss: 4322.678
grads_value: 0.1743665 0.5909153 0.3214991 0.01573551 0.4171126
loss: 4434.57
grads_value: 0.1418489 0.5958902 0.2778648 -0.04889731 0.4553466
loss: 4545.955
grads_value: 0.08841515 0.5324376 0.1339925 -0.1929437 0.4394998
loss: 4656.972
grads_value: 0.07162858 0.5048009 0.1342231 -0.2020071 0.4424397
loss: 4767.597
grads_value: 0.08485744 0.488587 0.05986075 -0.2134876 0.4643567
gradient ascent because we are increasing the loss?
post process the tensor so that we can dispaly it as an image:
deprocess_image <- function(x) {
dms <- dim(x)
x <- x - mean(x)
x <- x / (sd(x) + 1e-5)
x <- x * 0.1
x <- x + 0.5
x <- pmax(0, pmin(x, 1))
array(x, dim = dms)
}
put it all together in a function
generate_pattern <- function(layer_name, filter_index, size = 150) {
layer_output <- model$get_layer(layer_name)$output
loss <- k_mean(layer_output[,,,filter_index])
grads <- k_gradients(loss, model$input)[[1]]
grads <- grads / (k_sqrt(k_mean(k_square(grads))) + 1e-5)
iterate <- k_function(list(model$input), list(loss, grads))
input_img_data <-
array(runif(size * size * 3), dim = c(1, size, size, 3)) * 20 + 128
step <- 1
for (i in 1:40) {
c(loss_value, grads_value) %<-% iterate(list(input_img_data))
input_img_data <- input_img_data + (grads_value * step)
}
img <- input_img_data[1,,,]
deprocess_image(img)
}
library(grid)
grid.raster(generate_pattern("block3_conv1", 1))
library(grid)
library(gridExtra)
dir.create("vgg_filters")
for (layer_name in c("block1_conv1", "block2_conv1",
"block3_conv1", "block4_conv1")) {
size <- 140
png(paste0("vgg_filters/", layer_name, ".png"),
width = 8 * size, height = 8 * size)
grobs <- list()
for (i in 0:7) {
for (j in 0:7) {
pattern <- generate_pattern(layer_name, i + (j*8) + 1, size = size)
grob <- rasterGrob(pattern,
width = unit(0.9, "npc"),
height = unit(0.9, "npc"))
grobs[[length(grobs)+1]] <- grob
}
}
grid.arrange(grobs = grobs, ncol = 8)
dev.off()
}